{
struct ir_ctrl *ir_ctrl;
u32 sts, gcmd;
+ unsigned long flags;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
ir_ctrl->iremap_maddr |=
ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
#endif
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
/* set size of the interrupt remapping table */
ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_SIRTPS), sts);
-
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+
/* After set SIRTP, must globally invalidate the interrupt entry cache */
iommu_flush_iec_global(iommu);
+ spin_lock_irqsave(&iommu->register_lock, flags);
/* enable comaptiblity format interrupt pass through */
gcmd |= DMA_GCMD_CFI;
dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_IRES), sts);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
return init_apic_pin_2_ir_idx();
}
void disable_intremap(struct iommu *iommu)
{
u32 sts;
+ unsigned long flags;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
+ spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_IRES), sts);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static void iommu_flush_write_buffer(struct iommu *iommu)
{
u32 val;
- unsigned long flag;
+ unsigned long flags;
if ( !rwbf_quirk && !cap_rwbf(iommu->cap) )
return;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ spin_lock_irqsave(&iommu->register_lock, flags);
val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(val & DMA_GSTS_WBFS), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/* return value determine if we need a write buffer flush */
{
struct iommu *iommu = (struct iommu *) _iommu;
u64 val = 0;
- unsigned long flag;
+ unsigned long flags;
/*
* In the non-present entry flush case, if hardware doesn't cache
}
val |= DMA_CCMD_ICC;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq,
!(val & DMA_CCMD_ICC), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* flush context entry will implicitly flush write buffer */
return 0;
}
struct iommu *iommu = (struct iommu *) _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
- unsigned long flag;
+ unsigned long flags;
/*
* In the non-present entry flush case, if hardware doesn't cache
if ( cap_write_drain(iommu->cap) )
val |= DMA_TLB_WRITE_DRAIN;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ spin_lock_irqsave(&iommu->register_lock, flags);
/* Note: Only uses first TLB reg currently */
if ( val_iva )
dmar_writeq(iommu->reg, tlb_offset, val_iva);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq,
!(val & DMA_TLB_IVT), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* check IOTLB invalidation granularity */
if ( DMA_TLB_IAIG(val) == 0 )
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_TES), sts);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* Disable PMRs when VT-d engine takes effect per spec definition */
disable_pmr(iommu);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static void iommu_disable_translation(struct iommu *iommu)
void clear_fault_bits(struct iommu *iommu)
{
u64 val;
+ unsigned long flags;
+ spin_lock_irqsave(&iommu->register_lock, flags);
val = dmar_readq(
iommu->reg,
cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
val);
dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int init_vtd_hw(void)
struct iommu_flush *flush = NULL;
int vector;
int ret;
+ unsigned long flags;
for_each_drhd_unit ( drhd )
{
dma_msi_data_init(iommu, iommu->vector);
dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
clear_fault_bits(iommu);
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* initialize flush functions */
flush = iommu_get_flush(iommu);
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
u32 i;
+ unsigned long flags;
if ( !iommu_enabled )
return;
iommu = drhd->iommu;
i = iommu->index;
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG,
(u32) iommu_state[i][DMAR_FECTL_REG]);
dmar_writel(iommu->reg, DMAR_FEDATA_REG,
(u32) iommu_state[i][DMAR_FEADDR_REG]);
dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
(u32) iommu_state[i][DMAR_FEUADDR_REG]);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
iommu_enable_translation(iommu);
}
struct qi_ctrl *qi_ctrl;
struct iommu_flush *flush;
u32 sts;
+ unsigned long flags;
qi_ctrl = iommu_qi_ctrl(iommu);
flush = iommu_get_flush(iommu);
* to IQA register.
*/
qi_ctrl->qinval_maddr |= IQA_REG_QS;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_QIES), sts);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
qinval_enabled = 1;
return 0;
void disable_qinval(struct iommu *iommu)
{
u32 sts;
+ unsigned long flags;
ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
+ spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_QIES), sts);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}